MohamedAhmedAE commited on
Commit
2520d61
1 Parent(s): 57a4e58

Training in progress, step 1200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4cc0e03b9e41c36bebd5290c036c6ab8219fc845aed5c0fa6e9fcd25304615e4
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15df8bb7b0d650ace01d49121a6ff2801e53098ae1fbd955ed4336f0f8e11ee6
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6b25ff21c077ced998e166c13b16ca46c3928f2a8dec143c1e29895bcf0ed63e
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:44804d51d5f6b390f04a3c04221e0560893966402950edc2362e929c63a832a3
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:437c777924390da1dedc2bce8c9e0b6f4eb89d30ea814cea3a93ff7b5d5fae05
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42ae668d05ab3aad6ce208eabfc49a053f98a880118709786e12d55282ae6b22
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:89ee446e7e0c91089e5e6b7b626fa10c97dc177ed58fc09c887e3e9c3cd58b38
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b474e3bac276f26ded4a53adf20938b4253ff5f990814d1b36777f7eb9de45a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0007436630611402585,
5
  "eval_steps": 2000,
6
- "global_step": 1000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -42,6 +42,13 @@
42
  "learning_rate": 1.999999892574807e-05,
43
  "loss": 1.5775,
44
  "step": 1000
 
 
 
 
 
 
 
45
  }
46
  ],
47
  "logging_steps": 200,
@@ -49,7 +56,7 @@
49
  "num_input_tokens_seen": 0,
50
  "num_train_epochs": 5,
51
  "save_steps": 200,
52
- "total_flos": 1.2689424120840192e+16,
53
  "train_batch_size": 1,
54
  "trial_name": null,
55
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0008923956733683103,
5
  "eval_steps": 2000,
6
+ "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
42
  "learning_rate": 1.999999892574807e-05,
43
  "loss": 1.5775,
44
  "step": 1000
45
+ },
46
+ {
47
+ "epoch": 0.0,
48
+ "grad_norm": 2.1446919441223145,
49
+ "learning_rate": 1.9999998448916044e-05,
50
+ "loss": 1.6922,
51
+ "step": 1200
52
  }
53
  ],
54
  "logging_steps": 200,
 
56
  "num_input_tokens_seen": 0,
57
  "num_train_epochs": 5,
58
  "save_steps": 200,
59
+ "total_flos": 1.5519816481284096e+16,
60
  "train_batch_size": 1,
61
  "trial_name": null,
62
  "trial_params": null