MohamedAhmedAE commited on
Commit
e25e93a
1 Parent(s): ecce33c

Training in progress, step 3400, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1ffa24c6d65d6ebe046b12c298cf4ae161aa8f7a73a62399ba6434af84128daa
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88adee23a7e35b2cf4aebcd8c81faec7b932abea1a3d19cf7a8d36adbdbc169d
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ecd655e3d91e89f6e1fa258974b79bf0d215f8ace68f3d365b99110b5abe49ce
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73c08ba3cc41f312df74a4897f9862543590396f79d1f28fccf1aaa0e2d0a62e
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4c5ce141a264dd8520f51f3b5490a7f5208fde47774153324df63d2668f9a7d7
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8533a46f2820d296dd8dd66497b3038b4c8cd3388b495350dff5037c2a4c3482
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0770650b062be7cee981b3da10cdf7010bb9457e5bd03d190388904fc6d3b7aa
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:715607d045c48cb60eaa3afa895318c1f98eb549c11762cb3cadd29ed207b42b
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0023797217956488276,
5
  "eval_steps": 2000,
6
- "global_step": 3200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -119,6 +119,13 @@
119
  "learning_rate": 1.9999988884312347e-05,
120
  "loss": 1.6221,
121
  "step": 3200
 
 
 
 
 
 
 
122
  }
123
  ],
124
  "logging_steps": 200,
@@ -126,7 +133,7 @@
126
  "num_input_tokens_seen": 0,
127
  "num_train_epochs": 5,
128
  "save_steps": 200,
129
- "total_flos": 4.180089275793408e+16,
130
  "train_batch_size": 1,
131
  "trial_name": null,
132
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0025284544078768793,
5
  "eval_steps": 2000,
6
+ "global_step": 3400,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
119
  "learning_rate": 1.9999988884312347e-05,
120
  "loss": 1.6221,
121
  "step": 3200
122
+ },
123
+ {
124
+ "epoch": 0.0,
125
+ "grad_norm": 2.5657193660736084,
126
+ "learning_rate": 1.9999987447266877e-05,
127
+ "loss": 1.5533,
128
+ "step": 3400
129
  }
130
  ],
131
  "logging_steps": 200,
 
133
  "num_input_tokens_seen": 0,
134
  "num_train_epochs": 5,
135
  "save_steps": 200,
136
+ "total_flos": 4.438400445092659e+16,
137
  "train_batch_size": 1,
138
  "trial_name": null,
139
  "trial_params": null