infogep commited on
Commit
8542afe
·
verified ·
1 Parent(s): 2a09c90

Training in progress, step 100, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b165c57288b3a1cb87c459ca9217ab265550f0d9a0be4b03e47aea610cfe11da
3
  size 891333320
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:349b92fc771467ba19f4a4375705b0be18ee0a70d66dd5e9a3218efead140655
3
  size 891333320
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e8b527d3eca9fefe98f40ef8916f8d843e468b9d5861e46cd138601924c8cc76
3
  size 452984596
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25b655e6762cc1be326580ea77e8423738abff3ab79f8450863570718360a4d6
3
  size 452984596
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb6c31fdd8582b45a066dc89e3f99ddd96bde609925f8b3ac71fe97a5f9c5e25
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:100d6b5df23f5f5fd45227dfa104aadadc2938f6706932b75edbcb0c200c13d0
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:152cf5e1f9adf3dc2c608dbb3e394e09940ab42688cdb5a07d89f1cccb7f89a5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4d0f57336dd58c2282a758b1873df2644647c71e8296b0dab58cb3a9f5f7c78
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.3859279751777649,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.04088307440719542,
5
  "eval_steps": 50,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -58,6 +58,49 @@
58
  "eval_samples_per_second": 4.128,
59
  "eval_steps_per_second": 1.034,
60
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  }
62
  ],
63
  "logging_steps": 10,
@@ -86,7 +129,7 @@
86
  "attributes": {}
87
  }
88
  },
89
- "total_flos": 6.27041523204096e+16,
90
  "train_batch_size": 8,
91
  "trial_name": null,
92
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.32411646842956543,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-100",
4
+ "epoch": 0.08176614881439084,
5
  "eval_steps": 50,
6
+ "global_step": 100,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
58
  "eval_samples_per_second": 4.128,
59
  "eval_steps_per_second": 1.034,
60
  "step": 50
61
+ },
62
+ {
63
+ "epoch": 0.04905968928863451,
64
+ "grad_norm": 0.28594130277633667,
65
+ "learning_rate": 6.992307692307692e-06,
66
+ "loss": 0.4319,
67
+ "step": 60
68
+ },
69
+ {
70
+ "epoch": 0.05723630417007359,
71
+ "grad_norm": 0.6387773156166077,
72
+ "learning_rate": 6.215384615384615e-06,
73
+ "loss": 0.4787,
74
+ "step": 70
75
+ },
76
+ {
77
+ "epoch": 0.06541291905151267,
78
+ "grad_norm": 0.30498838424682617,
79
+ "learning_rate": 5.438461538461538e-06,
80
+ "loss": 0.5047,
81
+ "step": 80
82
+ },
83
+ {
84
+ "epoch": 0.07358953393295176,
85
+ "grad_norm": 0.15410414338111877,
86
+ "learning_rate": 4.661538461538462e-06,
87
+ "loss": 0.1164,
88
+ "step": 90
89
+ },
90
+ {
91
+ "epoch": 0.08176614881439084,
92
+ "grad_norm": 0.12575368583202362,
93
+ "learning_rate": 3.884615384615385e-06,
94
+ "loss": 0.0136,
95
+ "step": 100
96
+ },
97
+ {
98
+ "epoch": 0.08176614881439084,
99
+ "eval_loss": 0.32411646842956543,
100
+ "eval_runtime": 124.5506,
101
+ "eval_samples_per_second": 4.135,
102
+ "eval_steps_per_second": 1.036,
103
+ "step": 100
104
  }
105
  ],
106
  "logging_steps": 10,
 
129
  "attributes": {}
130
  }
131
  },
132
+ "total_flos": 1.247192480219136e+17,
133
  "train_batch_size": 8,
134
  "trial_name": null,
135
  "trial_params": null