BobaZooba commited on
Commit
f71f463
1 Parent(s): 0d499d9

Training in progress, step 300

Browse files
adapter_config.json CHANGED
@@ -14,13 +14,13 @@
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
17
- "k_proj",
18
- "down_proj",
19
  "o_proj",
20
- "up_proj",
21
  "q_proj",
22
  "v_proj",
23
- "gate_proj"
 
 
 
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
 
14
  "r": 64,
15
  "revision": null,
16
  "target_modules": [
 
 
17
  "o_proj",
 
18
  "q_proj",
19
  "v_proj",
20
+ "k_proj",
21
+ "up_proj",
22
+ "gate_proj",
23
+ "down_proj"
24
  ],
25
  "task_type": "CAUSAL_LM"
26
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:25e89b84367382ff8f0d486818a7e9a1c72e5409a56e029bc42bf1057d91069d
3
  size 335605144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3292b605913b97d05f8a16583a995dc94fc1588668a402e1784a8e0868dee3df
3
  size 335605144
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:97d67e28d055cab599d52c66d8f500f3e8a123b36c8a5fb3ca637e401660e9e8
3
  size 6264
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eb64e89142d2edd045ef5062485db930543cdfb71826620065d3de56bb9bc95
3
  size 6264
training_config.json CHANGED
@@ -2,7 +2,7 @@
2
  "experiment_key": "base",
3
  "save_safetensors": true,
4
  "max_shard_size": "10GB",
5
- "local_rank": 1,
6
  "use_gradient_checkpointing": true,
7
  "trainer_key": "lm",
8
  "force_fp32": false,
@@ -64,7 +64,7 @@
64
  "gradient_accumulation_steps": 2,
65
  "eval_accumulation_steps": null,
66
  "eval_delay": 0,
67
- "eval_steps": 1000,
68
  "warmup_steps": 100,
69
  "max_steps": null,
70
  "num_train_epochs": 5,
@@ -73,7 +73,7 @@
73
  "weight_decay": 0.001,
74
  "label_smoothing_factor": 0.1,
75
  "logging_steps": 1,
76
- "save_steps": 100,
77
  "save_total_limit": 0,
78
  "optim": "paged_adamw_8bit",
79
  "push_to_hub": true,
 
2
  "experiment_key": "base",
3
  "save_safetensors": true,
4
  "max_shard_size": "10GB",
5
+ "local_rank": 0,
6
  "use_gradient_checkpointing": true,
7
  "trainer_key": "lm",
8
  "force_fp32": false,
 
64
  "gradient_accumulation_steps": 2,
65
  "eval_accumulation_steps": null,
66
  "eval_delay": 0,
67
+ "eval_steps": 300,
68
  "warmup_steps": 100,
69
  "max_steps": null,
70
  "num_train_epochs": 5,
 
73
  "weight_decay": 0.001,
74
  "label_smoothing_factor": 0.1,
75
  "logging_steps": 1,
76
+ "save_steps": 300,
77
  "save_total_limit": 0,
78
  "optim": "paged_adamw_8bit",
79
  "push_to_hub": true,