|
{ |
|
"adapter_name": "testadapter2", |
|
"always_override": true, |
|
"save_steps": 0.0, |
|
"micro_batch_size": 1, |
|
"batch_size": 0, |
|
"epochs": 1.0, |
|
"learning_rate": "1e-2", |
|
"lr_scheduler_type": "linear", |
|
"cutoff_len": 512, |
|
"dataset": "None", |
|
"eval_dataset": "None", |
|
"format": "None", |
|
"eval_steps": 100.0, |
|
"raw_text_file": "Adastra2", |
|
"warmup_steps": 64.0, |
|
"optimizer": "adafactor", |
|
"hard_cut_string": "\\n\\n\\n", |
|
"train_only_after": "", |
|
"stop_at_loss": 0, |
|
"add_eos_token": true, |
|
"min_chars": 0.0, |
|
"report_to": "None", |
|
"precize_slicing_overlap": true, |
|
"add_eos_token_type": "Every Block", |
|
"save_steps_under_loss": 1.8, |
|
"add_bos_token": true, |
|
"training_projection": "all", |
|
"sliding_window": true, |
|
"warmup_ratio": 0, |
|
"grad_accumulation": 8, |
|
"lora_rank": 32, |
|
"lora_alpha": 64, |
|
"lora_dropout": 0.05, |
|
"higher_rank_limit": false, |
|
"adalora_rank": 32, |
|
"adalora_alpha": 64, |
|
"adalora_dropout": 0.05, |
|
"adalora_higher_rank_limit": false, |
|
"adapter_type": "ia3" |
|
} |