File size: 3,693 Bytes
232c380 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 |
{
"initial_model_dir": "models/llama-30b",
"distribution_id": "math_hard",
"date_trained": "11/10/2023 08:18:27",
"output_dir": "models/classify_lora/llama-30b-math_hard",
"overwrite_output_dir": false,
"do_train": false,
"do_eval": true,
"do_predict": false,
"evaluation_strategy": "steps",
"prediction_loss_only": false,
"per_device_train_batch_size": 8,
"per_device_eval_batch_size": 8,
"per_gpu_train_batch_size": null,
"per_gpu_eval_batch_size": null,
"gradient_accumulation_steps": 1,
"eval_accumulation_steps": null,
"eval_delay": 0,
"learning_rate": 0.0002,
"weight_decay": 0.0,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-08,
"max_grad_norm": 0.3,
"num_train_epochs": 2,
"max_steps": 100,
"lr_scheduler_type": "constant",
"warmup_ratio": 0.03,
"warmup_steps": 0,
"log_level": "passive",
"log_level_replica": "warning",
"log_on_each_node": true,
"logging_dir": "models/classify_lora/llama-30b-math_hard/runs/Oct11_05-49-08_compute-permanent-node-975",
"logging_strategy": "steps",
"logging_first_step": false,
"logging_steps": 1,
"logging_nan_inf_filter": true,
"save_strategy": "steps",
"save_steps": 25,
"save_total_limit": 0,
"save_safetensors": false,
"save_on_each_node": false,
"no_cuda": false,
"use_cpu": false,
"use_mps_device": false,
"seed": 42,
"data_seed": null,
"jit_mode_eval": false,
"use_ipex": false,
"bf16": false,
"fp16": false,
"fp16_opt_level": "O1",
"half_precision_backend": "auto",
"bf16_full_eval": false,
"fp16_full_eval": false,
"tf32": null,
"local_rank": 0,
"ddp_backend": null,
"tpu_num_cores": null,
"tpu_metrics_debug": false,
"debug": [],
"dataloader_drop_last": false,
"eval_steps": 25,
"dataloader_num_workers": 0,
"past_index": -1,
"run_name": "train|models-classify_lora-llama-30b-math_hard",
"disable_tqdm": false,
"remove_unused_columns": false,
"label_names": null,
"load_best_model_at_end": false,
"metric_for_best_model": "eval_math_hard_score",
"greater_is_better": true,
"ignore_data_skip": false,
"sharded_ddp": [],
"fsdp": [],
"fsdp_min_num_params": 0,
"fsdp_config": {
"min_num_params": 0,
"xla": false,
"xla_fsdp_grad_ckpt": false
},
"fsdp_transformer_layer_cls_to_wrap": null,
"deepspeed": "configs/ds_zero_1.json",
"label_smoothing_factor": 0.0,
"optim": "paged_adamw_32bit",
"optim_args": null,
"adafactor": false,
"group_by_length": false,
"length_column_name": "length",
"report_to": [
"wandb"
],
"ddp_find_unused_parameters": false,
"ddp_bucket_cap_mb": null,
"ddp_broadcast_buffers": null,
"dataloader_pin_memory": true,
"skip_memory_metrics": true,
"use_legacy_prediction_loop": false,
"push_to_hub": false,
"resume_from_checkpoint": null,
"hub_model_id": null,
"hub_strategy": "every_save",
"hub_token": null,
"hub_private_repo": false,
"hub_always_push": false,
"gradient_checkpointing": false,
"include_inputs_for_metrics": false,
"fp16_backend": "auto",
"push_to_hub_model_id": null,
"push_to_hub_organization": null,
"push_to_hub_token": null,
"_n_gpu": 1,
"mp_parameters": "",
"auto_find_batch_size": false,
"full_determinism": false,
"torchdynamo": null,
"ray_scope": "last",
"ddp_timeout": 1800,
"torch_compile": false,
"torch_compile_backend": null,
"torch_compile_mode": null,
"dispatch_batches": null
} |