Hanzalwi commited on
Commit
87a0d4d
1 Parent(s): a8b18bb

Training in progress, step 200, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:87cf31743e614965a50fedc4eb7254e261cc69cf7bdc4193d81cae3d82da8ffd
3
  size 25191576
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e02732510269342d14c62181c1280a9f610eb3992a7342492e6255d8d2d07c9b
3
  size 25191576
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:784552a53470b749bfb0fdb382aa1fba33ed8ed2166d99b25d3b23f8502eacfe
3
  size 50444613
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22ae8fcda2dea592fb87b1362f8af5d8bd1eda662f2df2118ec2b901891d7c63
3
  size 50444613
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1399d80939b4d27a777d1f87cb586e5a9acf0b7d01a6d85fd06934d47d7efe99
3
  size 14575
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92f35b713645b2f6fb9d57649d0c8c63bb1e7812311062435203302e19fb74ba
3
  size 14575
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:216f76b8039f833c337db298c81f13b12082d5fd4f9d866cecd34b2ca7550b37
3
  size 627
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7aa2c8b84e17817e6a4dcba5955fca913e266fdcd47f5594a29933ebd4972a01
3
  size 627
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 1.0747358798980713,
3
- "best_model_checkpoint": "./outputs/checkpoint-100",
4
- "epoch": 0.13333333333333333,
5
  "eval_steps": 100,
6
- "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -21,6 +21,20 @@
21
  "eval_samples_per_second": 5.445,
22
  "eval_steps_per_second": 0.683,
23
  "step": 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  }
25
  ],
26
  "logging_steps": 100,
@@ -28,7 +42,7 @@
28
  "num_input_tokens_seen": 0,
29
  "num_train_epochs": 3,
30
  "save_steps": 100,
31
- "total_flos": 2.8285532237070336e+16,
32
  "trial_name": null,
33
  "trial_params": null
34
  }
 
1
  {
2
+ "best_metric": 1.0527141094207764,
3
+ "best_model_checkpoint": "./outputs/checkpoint-200",
4
+ "epoch": 0.26666666666666666,
5
  "eval_steps": 100,
6
+ "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
21
  "eval_samples_per_second": 5.445,
22
  "eval_steps_per_second": 0.683,
23
  "step": 100
24
+ },
25
+ {
26
+ "epoch": 0.27,
27
+ "learning_rate": 0.0002,
28
+ "loss": 0.9173,
29
+ "step": 200
30
+ },
31
+ {
32
+ "epoch": 0.27,
33
+ "eval_loss": 1.0527141094207764,
34
+ "eval_runtime": 353.1278,
35
+ "eval_samples_per_second": 5.465,
36
+ "eval_steps_per_second": 0.685,
37
+ "step": 200
38
  }
39
  ],
40
  "logging_steps": 100,
 
42
  "num_input_tokens_seen": 0,
43
  "num_train_epochs": 3,
44
  "save_steps": 100,
45
+ "total_flos": 5.648480752523674e+16,
46
  "trial_name": null,
47
  "trial_params": null
48
  }