llama2-autotrain / training_params.json
dplutchok's picture
Upload folder using huggingface_hub
5fa1b00
{
"model_name": "NousResearch/Llama-2-7b-hf",
"data_path": "dplutchok/llama2-train100",
"train_split": "train",
"valid_split": null,
"text_column": "text",
"huggingface_token": null,
"learning_rate": 0.0002,
"num_train_epochs": 1,
"train_batch_size": 2,
"eval_batch_size": 4,
"warmup_ratio": 0.1,
"gradient_accumulation_steps": 1,
"optimizer": "adamw_torch",
"scheduler": "linear",
"weight_decay": 0.0,
"max_grad_norm": 1.0,
"seed": 42,
"add_eos_token": false,
"block_size": 2048,
"use_peft": true,
"lora_r": 16,
"lora_alpha": 32,
"lora_dropout": 0.05,
"training_type": "generic",
"train_on_inputs": false,
"logging_steps": -1,
"project_name": "lama2-train10",
"evaluation_strategy": "epoch",
"save_total_limit": 1,
"save_strategy": "epoch",
"auto_find_batch_size": false,
"fp16": false,
"push_to_hub": true,
"use_int8": false,
"model_max_length": 2048,
"repo_id": "dplutchok/llama2-autotrain",
"use_int4": true,
"trainer": "sft",
"target_modules": null
}