MohamedAhmedAE commited on
Commit
c0f27e7
1 Parent(s): 247481d

Training in progress, step 2000, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0d87d7b00b2cd87fc029fa649fcad601b3ab7944457ec983484f19f24a594882
3
  size 167832240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab0d2cef0a520f95d1b3a5e19c4faf1f2fdd4b669b292c657819c3293f9777df
3
  size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:665a4d9783c05d43fa2505e60c3df4f8fdb4f033e3075cab2b640d9f0f0ef616
3
  size 84581014
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e19bb13e6ddd38b55d269b81836df099f52d521292cc3147c8069c577d0d7f7
3
  size 84581014
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3d6334547612743196cb899c468f052098297738886954360e857941f54e2a3c
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:473e97fa4a79ae2cdbec269f5dd726507565e4476964fba62228ef7aefe54818
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3a72ce24312e683508adae33679f3e646acad48e43aa6427ea385038cc671f18
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a564ad02175d1fc4a411742de225c89f8307036c81f34a693a28057e5f5ff71
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0013385935100524654,
5
  "eval_steps": 2000,
6
- "global_step": 1800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -70,6 +70,13 @@
70
  "learning_rate": 1.9999996494428805e-05,
71
  "loss": 1.5682,
72
  "step": 1800
 
 
 
 
 
 
 
73
  }
74
  ],
75
  "logging_steps": 200,
@@ -77,7 +84,7 @@
77
  "num_input_tokens_seen": 0,
78
  "num_train_epochs": 5,
79
  "save_steps": 200,
80
- "total_flos": 2.3336484713988096e+16,
81
  "train_batch_size": 1,
82
  "trial_name": null,
83
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.001487326122280517,
5
  "eval_steps": 2000,
6
+ "global_step": 2000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
70
  "learning_rate": 1.9999996494428805e-05,
71
  "loss": 1.5682,
72
  "step": 1800
73
+ },
74
+ {
75
+ "epoch": 0.0,
76
+ "grad_norm": 4.574249267578125,
77
+ "learning_rate": 1.9999995668269356e-05,
78
+ "loss": 1.5658,
79
+ "step": 2000
80
  }
81
  ],
82
  "logging_steps": 200,
 
84
  "num_input_tokens_seen": 0,
85
  "num_train_epochs": 5,
86
  "save_steps": 200,
87
+ "total_flos": 2.6047198846255104e+16,
88
  "train_batch_size": 1,
89
  "trial_name": null,
90
  "trial_params": null