File size: 3,654 Bytes
5194ecc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
{
"output_dir": "ckpts/discrim/contrastive_models/gen.llama-7b/flan_t5_large_multiarith-clean_100K/",
"overwrite_output_dir": false,
"do_train": false,
"do_eval": true,
"do_predict": false,
"evaluation_strategy": "steps",
"prediction_loss_only": true,
"per_device_train_batch_size": 8,
"per_device_eval_batch_size": 8,
"per_gpu_train_batch_size": null,
"per_gpu_eval_batch_size": null,
"gradient_accumulation_steps": 2,
"eval_accumulation_steps": null,
"eval_delay": 0,
"learning_rate": 6e-05,
"weight_decay": 0.01,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-08,
"max_grad_norm": 1.0,
"num_train_epochs": 5.0,
"max_steps": -1,
"lr_scheduler_type": "cosine_with_restarts",
"warmup_ratio": 0.06,
"warmup_steps": 0,
"log_level": "passive",
"log_level_replica": "warning",
"log_on_each_node": true,
"logging_dir": "ckpts/discrim/contrastive_models/gen.llama-7b/flan_t5_large_multiarith-clean_100K/runs/May20_03-24-27_leibniz",
"logging_strategy": "steps",
"logging_first_step": false,
"logging_steps": 50,
"logging_nan_inf_filter": true,
"save_strategy": "steps",
"save_steps": 1000,
"save_total_limit": 2,
"save_safetensors": false,
"save_on_each_node": false,
"no_cuda": false,
"use_mps_device": false,
"seed": 7,
"data_seed": null,
"jit_mode_eval": false,
"use_ipex": false,
"bf16": true,
"fp16": false,
"fp16_opt_level": "O1",
"half_precision_backend": "cuda_amp",
"bf16_full_eval": false,
"fp16_full_eval": false,
"tf32": null,
"local_rank": 0,
"xpu_backend": null,
"tpu_num_cores": null,
"tpu_metrics_debug": false,
"debug": [],
"dataloader_drop_last": false,
"eval_steps": 1000,
"dataloader_num_workers": 0,
"past_index": -1,
"run_name": "ckpts/discrim/contrastive_models/gen.llama-7b/flan_t5_large_multiarith-clean_100K/",
"disable_tqdm": false,
"remove_unused_columns": true,
"label_names": null,
"load_best_model_at_end": true,
"metric_for_best_model": "loss",
"greater_is_better": false,
"ignore_data_skip": false,
"sharded_ddp": [],
"fsdp": [],
"fsdp_min_num_params": 0,
"fsdp_config": {
"fsdp_min_num_params": 0,
"xla": false,
"xla_fsdp_grad_ckpt": false
},
"fsdp_transformer_layer_cls_to_wrap": null,
"deepspeed": null,
"label_smoothing_factor": 0.0,
"optim": "adamw_hf",
"optim_args": null,
"adafactor": false,
"group_by_length": false,
"length_column_name": "length",
"report_to": [
"wandb"
],
"ddp_find_unused_parameters": null,
"ddp_bucket_cap_mb": null,
"dataloader_pin_memory": true,
"skip_memory_metrics": true,
"use_legacy_prediction_loop": false,
"push_to_hub": false,
"resume_from_checkpoint": null,
"hub_model_id": null,
"hub_strategy": "every_save",
"hub_token": null,
"hub_private_repo": false,
"gradient_checkpointing": false,
"include_inputs_for_metrics": false,
"fp16_backend": "auto",
"push_to_hub_model_id": null,
"push_to_hub_organization": null,
"push_to_hub_token": null,
"_n_gpu": 1,
"mp_parameters": "",
"auto_find_batch_size": false,
"full_determinism": false,
"torchdynamo": null,
"ray_scope": "last",
"ddp_timeout": 1800,
"torch_compile": false,
"torch_compile_backend": null,
"torch_compile_mode": null,
"margin": 1.0,
"ckpt_dir": null,
"loss_type": "maxmargin",
"fix_tokenizer": false,
"dev_metric": "loss"
} |