MohamedAhmedAE commited on
Commit
4437f26
1 Parent(s): cc39208

Training in progress, step 23200

Browse files
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:de73c8ea52abf40027b7383f06bc5c25664759ed8d01ab6b657edbeadffba2ef
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e7f627d305f378c78e9f5383b187508dd29135634d00313e9ea84b8956b2c84
3
  size 167832240
last-checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "gate_proj",
24
  "up_proj",
25
  "v_proj",
26
  "down_proj",
27
- "k_proj",
28
- "q_proj",
29
- "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "q_proj",
24
  "up_proj",
25
  "v_proj",
26
  "down_proj",
27
+ "o_proj",
28
+ "gate_proj",
29
+ "k_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c9ee0d265f8e70084ca81fdacdf39e7f9917ac02e1ede7d255b8adf0a2aeefc3
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20e56293ea86924617db1d72bddea10585e4be98a582ddca170c081dd3d642c9
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:167fb240eebf258d09f40184cf8ecb1a92b33eeedb04881336103f1db59d6557
3
  size 85736914
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a7c5ec127b3617a18b3d72d14dceb0ba197088d23e289ef92a6837ce8b5e15f
3
  size 85736914
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb17023e42a1975433cb9473b4b1010452af3b5b54c0534ea83365d644a81346
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4762419da8cab351088827e4869a7225442c8f1f7484efb21afc6c2799818eb
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:71a34daad23521f6056817d847b1bc44dcea2f8c4e6f41e8f160b1e661993d6f
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:47e872382e84dc07d00209e557644b4bf1503898fe7aa404af9c426170e40ad4
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.05131275121867784,
5
  "eval_steps": 2000,
6
- "global_step": 23000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -791,35 +791,14 @@
791
  "learning_rate": 1.9997813001808763e-05,
792
  "loss": 1.5603,
793
  "step": 22400
794
- },
795
- {
796
- "epoch": 0.05,
797
- "grad_norm": 3.027528762817383,
798
- "learning_rate": 1.9994991132809548e-05,
799
- "loss": 1.5228,
800
- "step": 22600
801
- },
802
- {
803
- "epoch": 0.05,
804
- "grad_norm": 1.9839845895767212,
805
- "learning_rate": 1.9994902463916502e-05,
806
- "loss": 1.5413,
807
- "step": 22800
808
- },
809
- {
810
- "epoch": 0.05,
811
- "grad_norm": 1.558858871459961,
812
- "learning_rate": 1.999481256582422e-05,
813
- "loss": 1.5713,
814
- "step": 23000
815
  }
816
  ],
817
  "logging_steps": 200,
818
- "max_steps": 2241155,
819
  "num_input_tokens_seen": 0,
820
  "num_train_epochs": 5,
821
  "save_steps": 200,
822
- "total_flos": 4.562049382247547e+17,
823
  "train_batch_size": 1,
824
  "trial_name": null,
825
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.033316105139083584,
5
  "eval_steps": 2000,
6
+ "global_step": 22400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
791
  "learning_rate": 1.9997813001808763e-05,
792
  "loss": 1.5603,
793
  "step": 22400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
794
  }
795
  ],
796
  "logging_steps": 200,
797
+ "max_steps": 3361735,
798
  "num_input_tokens_seen": 0,
799
  "num_train_epochs": 5,
800
  "save_steps": 200,
801
+ "total_flos": 4.327171237194056e+17,
802
  "train_batch_size": 1,
803
  "trial_name": null,
804
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c8b8d5d55034cf9499e0ba365ba364383f0583a4c0942d2f91245e7ceb5c8746
3
  size 4920
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a7800ffb29813f89bbd542587a923598ba056a5186a554421f6b09a6d22b374
3
  size 4920