sakura-3b / arguments.json
3v324v23's picture
first commit
cf05c06
{
"model_name_or_path": "cerebras/btlm-3b-8k-base",
"max_length": 8092,
"trust_remote_code": true,
"train_datasets": [
[
"bt",
{
"proportion": 1.0
}
]
],
"eval_datasets": null,
"epochs": 16,
"per_device_train_batch_size": 8,
"per_device_eval_batch_size": 2,
"gradient_accumulation_steps": 1,
"gradient_checkpointing": true,
"learning_rate": 4.7e-06,
"lr_scheduler_type": "cosine",
"num_warmup_steps": 20,
"weight_decay": 0.0,
"seed": 42,
"fp16": false,
"bf16": true,
"tf32": true,
"eval_strategy": "epoch",
"eval_interval": 1000000,
"need_eval": false,
"eval_split_ratio": null,
"output_dir": "/home/paperspace/safe-rlhf/output/sft",
"log_type": "wandb",
"log_dir": "/home/paperspace/safe-rlhf/output/sft",
"log_project": "BT-Training",
"log_run_name": "sft-2023-07-25-19-40-13",
"save_16bit": false,
"save_interval": 1000000,
"local_rank": 0,
"zero_stage": 2,
"deepspeed": false,
"deepspeed_config": null,
"deepscale": false,
"deepscale_config": null,
"deepspeed_mpi": false,
"global_rank": 0,
"device": {
"type": "torch.device",
"repr": "device(type='cuda', index=0)"
},
"num_update_steps_per_epoch": 55,
"total_training_steps": 880
}