CodeStral-RAG-Lora / Codestral-22B_Lora /training_parameters.json
TroyDoesAI's picture
An early checkpoint ~ Contextually obedient in answering, training curriculum is currently at training for contextual-refusals when the information provided is not sufficent to answer
a34c2f6 verified
raw
history blame contribute delete
946 Bytes
{
"lora_name": "Contextual-Codestral",
"always_override": true,
"q_proj_en": true,
"v_proj_en": true,
"k_proj_en": true,
"o_proj_en": true,
"gate_proj_en": true,
"down_proj_en": true,
"up_proj_en": true,
"save_steps": 0,
"micro_batch_size": 42,
"batch_size": 24,
"epochs": 1.42,
"learning_rate": "6e-7",
"lr_scheduler_type": "polynomial",
"lora_rank": 8,
"lora_alpha": 16,
"lora_dropout": 0.05,
"cutoff_len": 8192,
"dataset": "0_Contextual",
"eval_dataset": "1_I_Cannot_Answer_That_For_You_From_Provided_Context",
"format": "Mermaid-format",
"eval_steps": 100,
"raw_text_file": "None",
"overlap_len": 128,
"newline_favor_len": 128,
"higher_rank_limit": false,
"warmup_steps": 1,
"optimizer": "adagrad",
"hard_cut_string": "\\n\\n\\n",
"train_only_after": "",
"stop_at_loss": 0.2,
"add_eos_token": true,
"min_chars": 0,
"report_to": "None"
}