MohamedAhmedAE commited on
Commit
198a8f0
1 Parent(s): 7e36ebf

Training in progress, step 1800, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e031d128a56e1f2c0d13eb1bdf24b11e3161a5c84ee640ffe8148c5cde2e7779
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d87d7b00b2cd87fc029fa649fcad601b3ab7944457ec983484f19f24a594882
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68b7b1fbf5329eb8a91ba2dd7567fcfc9167143f8b5b1e8a308611118d62b893
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:665a4d9783c05d43fa2505e60c3df4f8fdb4f033e3075cab2b640d9f0f0ef616
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d51ba13d8ecce76d2ed0ab5874b7250d2ee372ca336a46f1c56ac6d02ec40073
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d6334547612743196cb899c468f052098297738886954360e857941f54e2a3c
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c9e9f1ae41fa1e5ee80b76c39d1d8aae5af530178bf78fd25d0ba3cffdc224c
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a72ce24312e683508adae33679f3e646acad48e43aa6427ea385038cc671f18
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0011898608978244138,
5
  "eval_steps": 2000,
6
- "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -63,6 +63,13 @@
63
  "learning_rate": 1.9999997233256404e-05,
64
  "loss": 1.6001,
65
  "step": 1600
 
 
 
 
 
 
 
66
  }
67
  ],
68
  "logging_steps": 200,
@@ -70,7 +77,7 @@
70
  "num_input_tokens_seen": 0,
71
  "num_train_epochs": 5,
72
  "save_steps": 200,
73
- "total_flos": 2.07320002916352e+16,
74
  "train_batch_size": 1,
75
  "trial_name": null,
76
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0013385935100524654,
5
  "eval_steps": 2000,
6
+ "global_step": 1800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
63
  "learning_rate": 1.9999997233256404e-05,
64
  "loss": 1.6001,
65
  "step": 1600
66
+ },
67
+ {
68
+ "epoch": 0.0,
69
+ "grad_norm": 3.046320676803589,
70
+ "learning_rate": 1.9999996494428805e-05,
71
+ "loss": 1.5682,
72
+ "step": 1800
73
  }
74
  ],
75
  "logging_steps": 200,
 
77
  "num_input_tokens_seen": 0,
78
  "num_train_epochs": 5,
79
  "save_steps": 200,
80
+ "total_flos": 2.3336484713988096e+16,
81
  "train_batch_size": 1,
82
  "trial_name": null,
83
  "trial_params": null