| { |
| "lora_name": "Written", |
| "always_override": false, |
| "q_proj_en": true, |
| "v_proj_en": true, |
| "k_proj_en": false, |
| "o_proj_en": false, |
| "gate_proj_en": false, |
| "down_proj_en": false, |
| "up_proj_en": false, |
| "save_steps": 0.0, |
| "micro_batch_size": 8, |
| "batch_size": 256, |
| "epochs": 15.0, |
| "learning_rate": "3e-4", |
| "lr_scheduler_type": "linear", |
| "lora_rank": 128, |
| "lora_alpha": 256, |
| "lora_dropout": 0.05, |
| "cutoff_len": 256, |
| "dataset": "None", |
| "eval_dataset": "None", |
| "format": "None", |
| "eval_steps": 100.0, |
| "raw_text_file": "Writing Lora", |
| "overlap_len": 128, |
| "newline_favor_len": 128, |
| "higher_rank_limit": false, |
| "warmup_steps": 100.0, |
| "optimizer": "adamw_torch", |
| "hard_cut_string": "\\n\\n\\n", |
| "train_only_after": "", |
| "stop_at_loss": 0, |
| "add_eos_token": false, |
| "min_chars": 0.0, |
| "report_to": "None" |
| } |