{ | |
"model_name": "TheBloke/Llama-2-7B-fp16", | |
"data_path": "tatsu-lab/alpaca", | |
"train_split": "train", | |
"valid_split": null, | |
"text_column": "text", | |
"huggingface_token": null, | |
"learning_rate": 0.0002, | |
"num_train_epochs": 3, | |
"train_batch_size": 2, | |
"eval_batch_size": 4, | |
"warmup_ratio": 0.1, | |
"gradient_accumulation_steps": 1, | |
"optimizer": "adamw_torch", | |
"scheduler": "linear", | |
"weight_decay": 0.0, | |
"max_grad_norm": 1.0, | |
"seed": 42, | |
"add_eos_token": false, | |
"block_size": 2048, | |
"use_peft": true, | |
"lora_r": 16, | |
"lora_alpha": 32, | |
"lora_dropout": 0.05, | |
"training_type": "generic", | |
"train_on_inputs": false, | |
"logging_steps": -1, | |
"project_name": "Eigth Llama Train", | |
"evaluation_strategy": "epoch", | |
"save_total_limit": 1, | |
"save_strategy": "epoch", | |
"auto_find_batch_size": false, | |
"fp16": false, | |
"push_to_hub": true, | |
"use_int8": false, | |
"model_max_length": 2048, | |
"repo_id": "cminor102/testingnewmodel3", | |
"use_int4": true, | |
"trainer": "sft", | |
"target_modules": null | |
} |