AlekseyKorshuk commited on
Commit
c7221a1
1 Parent(s): fa424a4

huggingartists

Browse files
README.md CHANGED
@@ -45,15 +45,15 @@ from datasets import load_dataset
45
  dataset = load_dataset("huggingartists/6ix9ine")
46
  ```
47
 
48
- [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/33owzv4t/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on 6ix9ine's lyrics.
53
 
54
- Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/22q1oxft) for full transparency and reproducibility.
55
 
56
- At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/22q1oxft/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
45
  dataset = load_dataset("huggingartists/6ix9ine")
46
  ```
47
 
48
+ [Explore the data](https://wandb.ai/huggingartists/huggingartists/runs/2myyormf/artifacts), which is tracked with [W&B artifacts](https://docs.wandb.com/artifacts) at every step of the pipeline.
49
 
50
  ## Training procedure
51
 
52
  The model is based on a pre-trained [GPT-2](https://huggingface.co/gpt2) which is fine-tuned on 6ix9ine's lyrics.
53
 
54
+ Hyperparameters and metrics are recorded in the [W&B training run](https://wandb.ai/huggingartists/huggingartists/runs/7knioj7l) for full transparency and reproducibility.
55
 
56
+ At the end of training, [the final model](https://wandb.ai/huggingartists/huggingartists/runs/7knioj7l/artifacts) is logged and versioned.
57
 
58
  ## How to use
59
 
evaluation.txt CHANGED
@@ -1 +1 @@
1
- {"eval_loss": 3.0470073223114014, "eval_runtime": 2.6704, "eval_samples_per_second": 20.596, "eval_steps_per_second": 2.621, "epoch": 2.0}
1
+ {"eval_loss": 2.900149345397949, "eval_runtime": 2.5222, "eval_samples_per_second": 21.013, "eval_steps_per_second": 2.775, "epoch": 3.0}
flax_model.msgpack CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8df15ff3bf9d8a1748a052761e63c3b2f0e766961f7279a8de10378dfaeaf585
3
  size 497764120
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9509193c5fd5514fa34c5b8e4deef5d7ef27e6f54a88541e72dc9bbdcf0a7cd
3
  size 497764120
optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fb7c973b07fc7925a7773a7a9b8e96db55101b49d6bf109cfa76b63b9f7b450e
3
  size 995603825
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ab2bc41a7353a377e1364b89258475aeb37844383ea22bc18242bc8ff867965
3
  size 995603825
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fd1cf3ab818bb31d83b57f1db70e0d084389c521eecdf3cbb96f980dd4a48168
3
  size 510403817
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30fde55c2cd1fd62cf61de771d05ea50c736a8b1286a06e577692582cfdeed20
3
  size 510403817
rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29d7a8e82d5104f8855899362d2c29661c48c9cc84dceb8ac5bbc637db2337f5
3
  size 14503
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc87f4591da50bc4de4f46a7c5f73ef224136f280b5366df533254560016512d
3
  size 14503
scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:00dd6b6c01d14ec8bb955c14a18bd11e818291c6d3cf3ac44048cedcc0186d76
3
  size 623
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:744fcc656fe1f449c2ea79ccccba224c3e3c1abb31179e9ce51f592461ac2c71
3
  size 623
trainer_state.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "best_metric": 3.0470073223114014,
3
- "best_model_checkpoint": "output/6ix9ine/checkpoint-84",
4
- "epoch": 2.0,
5
- "global_step": 84,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
@@ -118,11 +118,73 @@
118
  "eval_samples_per_second": 20.952,
119
  "eval_steps_per_second": 2.667,
120
  "step": 84
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  }
122
  ],
123
- "max_steps": 84,
124
- "num_train_epochs": 2,
125
- "total_flos": 86226370560000.0,
126
  "trial_name": null,
127
  "trial_params": null
128
  }
1
  {
2
+ "best_metric": 2.900149345397949,
3
+ "best_model_checkpoint": "output/6ix9ine/checkpoint-126",
4
+ "epoch": 3.0,
5
+ "global_step": 126,
6
  "is_hyper_param_search": false,
7
  "is_local_process_zero": true,
8
  "is_world_process_zero": true,
118
  "eval_samples_per_second": 20.952,
119
  "eval_steps_per_second": 2.667,
120
  "step": 84
121
+ },
122
+ {
123
+ "epoch": 2.02,
124
+ "learning_rate": 1.9181951337103552e-07,
125
+ "loss": 2.7319,
126
+ "step": 85
127
+ },
128
+ {
129
+ "epoch": 2.14,
130
+ "learning_rate": 6.793535661894039e-06,
131
+ "loss": 2.7967,
132
+ "step": 90
133
+ },
134
+ {
135
+ "epoch": 2.26,
136
+ "learning_rate": 2.194015018891494e-05,
137
+ "loss": 2.9303,
138
+ "step": 95
139
+ },
140
+ {
141
+ "epoch": 2.38,
142
+ "learning_rate": 4.3537605728465236e-05,
143
+ "loss": 2.6571,
144
+ "step": 100
145
+ },
146
+ {
147
+ "epoch": 2.5,
148
+ "learning_rate": 6.859999999999997e-05,
149
+ "loss": 2.816,
150
+ "step": 105
151
+ },
152
+ {
153
+ "epoch": 2.62,
154
+ "learning_rate": 9.36623942715347e-05,
155
+ "loss": 2.8496,
156
+ "step": 110
157
+ },
158
+ {
159
+ "epoch": 2.74,
160
+ "learning_rate": 0.00011525984981108502,
161
+ "loss": 2.9189,
162
+ "step": 115
163
+ },
164
+ {
165
+ "epoch": 2.86,
166
+ "learning_rate": 0.00013040646433810595,
167
+ "loss": 2.8699,
168
+ "step": 120
169
+ },
170
+ {
171
+ "epoch": 2.98,
172
+ "learning_rate": 0.00013700818048662894,
173
+ "loss": 2.5821,
174
+ "step": 125
175
+ },
176
+ {
177
+ "epoch": 3.0,
178
+ "eval_loss": 2.900149345397949,
179
+ "eval_runtime": 2.5511,
180
+ "eval_samples_per_second": 20.775,
181
+ "eval_steps_per_second": 2.744,
182
+ "step": 126
183
  }
184
  ],
185
+ "max_steps": 126,
186
+ "num_train_epochs": 3,
187
+ "total_flos": 129600847872000.0,
188
  "trial_name": null,
189
  "trial_params": null
190
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cd271d0572a7a3468694d79a611ddcf794549420f744198cdf5961c723dc8d78
3
  size 2863
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e93bb873917a9ff90818f9d9ee0e70a0a0c0493fe6d3d599f966dfc8cd40a4e
3
  size 2863