MohamedAhmedAE commited on
Commit
88abad7
1 Parent(s): ba2f649

Training in progress, step 28600

Browse files
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e0898b8f5fa3a127716bceae6fe688a1ff30ea05ce1eed02a57a906da398fa3
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:58e49d067be4a31bbffaaed3adab728281e55bc38b07f3af94a80448a24f06fc
3
  size 167832240
last-checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "up_proj",
 
24
  "q_proj",
25
- "gate_proj",
26
- "k_proj",
27
  "o_proj",
28
- "v_proj",
29
- "down_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "k_proj",
24
+ "down_proj",
25
  "up_proj",
26
+ "v_proj",
27
  "q_proj",
 
 
28
  "o_proj",
29
+ "gate_proj"
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e0898b8f5fa3a127716bceae6fe688a1ff30ea05ce1eed02a57a906da398fa3
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:781ebd998a71a8a8dd274340aa1124e66d3e00c0fe5093f9222bc4cbcd86aea4
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8c120ea76368df90f3c378a34062125bdbe4c5874ea8ffe15f0ecfbcefb564f
3
  size 85736914
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25f27f39f3f676a65d1a0f1adedcb02f1c57c226e2ebe8e8d1fa52b1fce80bd5
3
  size 85736914
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:92f6510343ddd74a9e520990c2a4ef079d80a3779f8f0b27f68a83c61c43be96
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9677b30ce0049dd05e6c506abb4b29c5c6375c70f6e47b50d2834249967a3544
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:794ba8d2b75eeb37bc392c89344ad479346f0d00859df5a0629b6f31f5ee82c1
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72cff663de036e0467d57060700e3479f0afb43e1a6ff1b8ccc5306bd9e04e29
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.16896024749106675,
5
  "eval_steps": 2000,
6
- "global_step": 28400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -945,66 +945,10 @@
945
  "learning_rate": 1.980030022063824e-05,
946
  "loss": 1.5308,
947
  "step": 26800
948
- },
949
- {
950
- "epoch": 0.16063122120629586,
951
- "grad_norm": 1.242712140083313,
952
- "learning_rate": 1.9949200564244935e-05,
953
- "loss": 1.527,
954
- "step": 27000
955
- },
956
- {
957
- "epoch": 0.16182108210412027,
958
- "grad_norm": 1.236722707748413,
959
- "learning_rate": 1.9948445168438075e-05,
960
- "loss": 1.5205,
961
- "step": 27200
962
- },
963
- {
964
- "epoch": 0.16301094300194469,
965
- "grad_norm": 1.1174817085266113,
966
- "learning_rate": 1.9947684212090804e-05,
967
- "loss": 1.5316,
968
- "step": 27400
969
- },
970
- {
971
- "epoch": 0.1642008038997691,
972
- "grad_norm": 1.0598382949829102,
973
- "learning_rate": 1.9946917695628444e-05,
974
- "loss": 1.5349,
975
- "step": 27600
976
- },
977
- {
978
- "epoch": 0.1653906647975935,
979
- "grad_norm": 0.8770107626914978,
980
- "learning_rate": 1.9946145619479428e-05,
981
- "loss": 1.5308,
982
- "step": 27800
983
- },
984
- {
985
- "epoch": 0.16658052569541792,
986
- "grad_norm": 1.3964169025421143,
987
- "learning_rate": 1.9945367984075302e-05,
988
- "loss": 1.5313,
989
- "step": 28000
990
- },
991
- {
992
- "epoch": 0.16777038659324234,
993
- "grad_norm": 0.9861007928848267,
994
- "learning_rate": 1.9944584789850707e-05,
995
- "loss": 1.5065,
996
- "step": 28200
997
- },
998
- {
999
- "epoch": 0.16896024749106675,
1000
- "grad_norm": 1.196914792060852,
1001
- "learning_rate": 1.9943796037243405e-05,
1002
- "loss": 1.5059,
1003
- "step": 28400
1004
  }
1005
  ],
1006
  "logging_steps": 200,
1007
- "max_steps": 840430,
1008
  "num_input_tokens_seen": 0,
1009
  "num_train_epochs": 5,
1010
  "save_steps": 200,
@@ -1020,7 +964,7 @@
1020
  "attributes": {}
1021
  }
1022
  },
1023
- "total_flos": 9.739318665690563e+17,
1024
  "train_batch_size": 1,
1025
  "trial_name": null,
1026
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.3188827206169429,
5
  "eval_steps": 2000,
6
+ "global_step": 26800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
945
  "learning_rate": 1.980030022063824e-05,
946
  "loss": 1.5308,
947
  "step": 26800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
948
  }
949
  ],
950
  "logging_steps": 200,
951
+ "max_steps": 420215,
952
  "num_input_tokens_seen": 0,
953
  "num_train_epochs": 5,
954
  "save_steps": 200,
 
964
  "attributes": {}
965
  }
966
  },
967
+ "total_flos": 8.07870830933975e+17,
968
  "train_batch_size": 1,
969
  "trial_name": null,
970
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16a9609226cfd3157635ce680f272eabc60b581c1e5a072a567443ee41ec1194
3
  size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99da71cc1761188ab44f1fc840fa4a8d321bbf1910892d2eb8c5958220e9679a
3
  size 5112