stillerman commited on
Commit
de1f2a4
1 Parent(s): edf61a1

commit files to HF hub

Browse files
Files changed (5) hide show
  1. README.md +23 -6
  2. all_results.json +12 -6
  3. eval_results.json +7 -6
  4. train_results.json +8 -0
  5. trainer_state.json +193 -0
README.md CHANGED
@@ -2,22 +2,35 @@
2
  license: apache-2.0
3
  tags:
4
  - generated_from_trainer
 
 
5
  metrics:
6
  - accuracy
7
  model-index:
8
- - name: jason-expert-eli5-0.5k-same-ds
9
- results: []
 
 
 
 
 
 
 
 
 
 
 
10
  ---
11
 
12
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
  should probably proofread and complete it, then remove this comment. -->
14
 
15
- # jason-expert-eli5-0.5k-same-ds
16
 
17
- This model is a fine-tuned version of [EleutherAI/pythia-1b-deduped](https://huggingface.co/EleutherAI/pythia-1b-deduped) on the None dataset.
18
  It achieves the following results on the evaluation set:
19
- - Loss: 2.7820
20
- - Accuracy: 0.4294
21
 
22
  ## Model description
23
 
@@ -63,3 +76,7 @@ The following hyperparameters were used during training:
63
  - Pytorch 2.0.1+rocm5.4.2
64
  - Datasets 2.11.0
65
  - Tokenizers 0.13.3
 
 
 
 
 
2
  license: apache-2.0
3
  tags:
4
  - generated_from_trainer
5
+ datasets:
6
+ - eli5
7
  metrics:
8
  - accuracy
9
  model-index:
10
+ - name: layer_9,10,11,12,13
11
+ results:
12
+ - task:
13
+ type: text-generation
14
+ name: Causal Language Modeling
15
+ dataset:
16
+ name: eli5
17
+ type: eli5
18
+ split: None
19
+ metrics:
20
+ - type: accuracy
21
+ value: 0.49515362035225047
22
+ name: Accuracy
23
  ---
24
 
25
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
26
  should probably proofread and complete it, then remove this comment. -->
27
 
28
+ # layer_9,10,11,12,13
29
 
30
+ This model is a fine-tuned version of [EleutherAI/pythia-1b-deduped](https://huggingface.co/EleutherAI/pythia-1b-deduped) on the eli5 dataset.
31
  It achieves the following results on the evaluation set:
32
+ - Loss: 2.4496
33
+ - Accuracy: 0.4952
34
 
35
  ## Model description
36
 
 
76
  - Pytorch 2.0.1+rocm5.4.2
77
  - Datasets 2.11.0
78
  - Tokenizers 0.13.3
79
+
80
+
81
+ ## Wandb Report
82
+ https://wandb.ai/ontocord/jason-test-pythia-1b-deduped-layer-test-eli5/runs/eecbe79c
all_results.json CHANGED
@@ -1,9 +1,15 @@
1
  {
2
- "eval_accuracy": 0.06482681017612524,
3
- "eval_loss": 5.38467264175415,
4
- "eval_runtime": 24.788,
 
5
  "eval_samples": 2000,
6
- "eval_samples_per_second": 80.684,
7
- "eval_steps_per_second": 1.291,
8
- "perplexity": 218.03871565182433
 
 
 
 
 
9
  }
 
1
  {
2
+ "epoch": 14.81,
3
+ "eval_accuracy": 0.49515362035225047,
4
+ "eval_loss": 2.449622631072998,
5
+ "eval_runtime": 20.7527,
6
  "eval_samples": 2000,
7
+ "eval_samples_per_second": 96.373,
8
+ "eval_steps_per_second": 1.542,
9
+ "perplexity": 11.58397446228507,
10
+ "train_loss": 3.714359001159668,
11
+ "train_runtime": 485.1916,
12
+ "train_samples": 2153,
13
+ "train_samples_per_second": 65.953,
14
+ "train_steps_per_second": 1.031
15
  }
eval_results.json CHANGED
@@ -1,9 +1,10 @@
1
  {
2
- "eval_accuracy": 0.06482681017612524,
3
- "eval_loss": 5.38467264175415,
4
- "eval_runtime": 24.788,
 
5
  "eval_samples": 2000,
6
- "eval_samples_per_second": 80.684,
7
- "eval_steps_per_second": 1.291,
8
- "perplexity": 218.03871565182433
9
  }
 
1
  {
2
+ "epoch": 14.81,
3
+ "eval_accuracy": 0.49515362035225047,
4
+ "eval_loss": 2.449622631072998,
5
+ "eval_runtime": 20.7527,
6
  "eval_samples": 2000,
7
+ "eval_samples_per_second": 96.373,
8
+ "eval_steps_per_second": 1.542,
9
+ "perplexity": 11.58397446228507
10
  }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 14.81,
3
+ "train_loss": 3.714359001159668,
4
+ "train_runtime": 485.1916,
5
+ "train_samples": 2153,
6
+ "train_samples_per_second": 65.953,
7
+ "train_steps_per_second": 1.031
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 14.814814814814815,
5
+ "global_step": 500,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.59,
12
+ "learning_rate": 9.6e-05,
13
+ "loss": 5.0937,
14
+ "step": 20
15
+ },
16
+ {
17
+ "epoch": 1.19,
18
+ "learning_rate": 9.200000000000001e-05,
19
+ "loss": 4.8773,
20
+ "step": 40
21
+ },
22
+ {
23
+ "epoch": 1.78,
24
+ "learning_rate": 8.800000000000001e-05,
25
+ "loss": 4.7701,
26
+ "step": 60
27
+ },
28
+ {
29
+ "epoch": 2.37,
30
+ "learning_rate": 8.4e-05,
31
+ "loss": 4.6992,
32
+ "step": 80
33
+ },
34
+ {
35
+ "epoch": 2.96,
36
+ "learning_rate": 8e-05,
37
+ "loss": 4.6032,
38
+ "step": 100
39
+ },
40
+ {
41
+ "epoch": 3.56,
42
+ "learning_rate": 7.6e-05,
43
+ "loss": 4.5531,
44
+ "step": 120
45
+ },
46
+ {
47
+ "epoch": 4.15,
48
+ "learning_rate": 7.2e-05,
49
+ "loss": 4.5023,
50
+ "step": 140
51
+ },
52
+ {
53
+ "epoch": 4.74,
54
+ "learning_rate": 6.800000000000001e-05,
55
+ "loss": 4.4622,
56
+ "step": 160
57
+ },
58
+ {
59
+ "epoch": 5.33,
60
+ "learning_rate": 6.400000000000001e-05,
61
+ "loss": 4.3641,
62
+ "step": 180
63
+ },
64
+ {
65
+ "epoch": 5.93,
66
+ "learning_rate": 6e-05,
67
+ "loss": 4.3186,
68
+ "step": 200
69
+ },
70
+ {
71
+ "epoch": 5.93,
72
+ "eval_accuracy": 0.13991878669275928,
73
+ "eval_loss": 4.384984493255615,
74
+ "eval_runtime": 20.6329,
75
+ "eval_samples_per_second": 96.933,
76
+ "eval_steps_per_second": 1.551,
77
+ "step": 200
78
+ },
79
+ {
80
+ "epoch": 6.52,
81
+ "learning_rate": 5.6000000000000006e-05,
82
+ "loss": 4.1148,
83
+ "step": 220
84
+ },
85
+ {
86
+ "epoch": 7.11,
87
+ "learning_rate": 5.2000000000000004e-05,
88
+ "loss": 4.0925,
89
+ "step": 240
90
+ },
91
+ {
92
+ "epoch": 7.7,
93
+ "learning_rate": 4.8e-05,
94
+ "loss": 3.8203,
95
+ "step": 260
96
+ },
97
+ {
98
+ "epoch": 8.3,
99
+ "learning_rate": 4.4000000000000006e-05,
100
+ "loss": 3.6945,
101
+ "step": 280
102
+ },
103
+ {
104
+ "epoch": 8.89,
105
+ "learning_rate": 4e-05,
106
+ "loss": 3.5112,
107
+ "step": 300
108
+ },
109
+ {
110
+ "epoch": 9.48,
111
+ "learning_rate": 3.6e-05,
112
+ "loss": 3.3269,
113
+ "step": 320
114
+ },
115
+ {
116
+ "epoch": 10.07,
117
+ "learning_rate": 3.2000000000000005e-05,
118
+ "loss": 3.2166,
119
+ "step": 340
120
+ },
121
+ {
122
+ "epoch": 10.67,
123
+ "learning_rate": 2.8000000000000003e-05,
124
+ "loss": 2.9733,
125
+ "step": 360
126
+ },
127
+ {
128
+ "epoch": 11.26,
129
+ "learning_rate": 2.4e-05,
130
+ "loss": 2.8717,
131
+ "step": 380
132
+ },
133
+ {
134
+ "epoch": 11.85,
135
+ "learning_rate": 2e-05,
136
+ "loss": 2.7653,
137
+ "step": 400
138
+ },
139
+ {
140
+ "epoch": 11.85,
141
+ "eval_accuracy": 0.4293894324853229,
142
+ "eval_loss": 2.7819876670837402,
143
+ "eval_runtime": 22.9408,
144
+ "eval_samples_per_second": 87.181,
145
+ "eval_steps_per_second": 1.395,
146
+ "step": 400
147
+ },
148
+ {
149
+ "epoch": 12.44,
150
+ "learning_rate": 1.6000000000000003e-05,
151
+ "loss": 2.5936,
152
+ "step": 420
153
+ },
154
+ {
155
+ "epoch": 13.04,
156
+ "learning_rate": 1.2e-05,
157
+ "loss": 2.5338,
158
+ "step": 440
159
+ },
160
+ {
161
+ "epoch": 13.63,
162
+ "learning_rate": 8.000000000000001e-06,
163
+ "loss": 2.3836,
164
+ "step": 460
165
+ },
166
+ {
167
+ "epoch": 14.22,
168
+ "learning_rate": 4.000000000000001e-06,
169
+ "loss": 2.3762,
170
+ "step": 480
171
+ },
172
+ {
173
+ "epoch": 14.81,
174
+ "learning_rate": 0.0,
175
+ "loss": 2.3408,
176
+ "step": 500
177
+ },
178
+ {
179
+ "epoch": 14.81,
180
+ "step": 500,
181
+ "total_flos": 8.933464866816e+16,
182
+ "train_loss": 3.714359001159668,
183
+ "train_runtime": 485.1916,
184
+ "train_samples_per_second": 65.953,
185
+ "train_steps_per_second": 1.031
186
+ }
187
+ ],
188
+ "max_steps": 500,
189
+ "num_train_epochs": 16,
190
+ "total_flos": 8.933464866816e+16,
191
+ "trial_name": null,
192
+ "trial_params": null
193
+ }